Per-vcpu IO evtchn patch for HVM domain.
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 23 Feb 2006 10:22:25 +0000 (11:22 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 23 Feb 2006 10:22:25 +0000 (11:22 +0100)
We are starting to send patches to support SMP VMX guest.

Signed-off-by: Xin Li <xin.b.li@intel.com>
21 files changed:
tools/ioemu/target-i386-dm/helper2.c
tools/ioemu/vl.c
tools/libxc/xc_hvm_build.c
tools/libxc/xenguest.h
tools/python/xen/lowlevel/xc/xc.c
tools/python/xen/xend/image.py
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/intercept.c
xen/arch/x86/hvm/io.c
xen/arch/x86/hvm/platform.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/svm/vmcb.c
xen/arch/x86/hvm/vlapic.c
xen/arch/x86/hvm/vmx/io.c
xen/arch/x86/hvm/vmx/vmcs.c
xen/arch/x86/hvm/vmx/vmx.c
xen/common/event_channel.c
xen/include/asm-x86/hvm/io.h
xen/include/asm-x86/hvm/support.h
xen/include/public/hvm/ioreq.h
xen/include/xen/event.h

index 94f10df4a3e84edb0ee59fc7f64b2ca8cdf20e68..bc6bc0a12f67e0981a9069dd782821c10bf6499f 100644 (file)
@@ -125,9 +125,8 @@ target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
 //the evtchn fd for polling
 int evtchn_fd = -1;
 
-//the evtchn port for polling the notification,
-//should be inputed as bochs's parameter
-evtchn_port_t ioreq_remote_port, ioreq_local_port;
+//which vcpu we are serving
+int send_vcpu = 0;
 
 //some functions to handle the io req packet
 void sp_info()
@@ -135,52 +134,62 @@ void sp_info()
     ioreq_t *req;
     int i;
 
-    term_printf("event port: %d\n", shared_page->sp_global.eport);
     for ( i = 0; i < vcpus; i++ ) {
         req = &(shared_page->vcpu_iodata[i].vp_ioreq);
-        term_printf("vcpu %d:\n", i);
+        term_printf("vcpu %d: event port %d\n",
+                    i, shared_page->vcpu_iodata[i].vp_eport);
         term_printf("  req state: %x, pvalid: %x, addr: %llx, "
                     "data: %llx, count: %llx, size: %llx\n",
                     req->state, req->pdata_valid, req->addr,
                     req->u.data, req->count, req->size);
+        term_printf("  IO totally occurred on this vcpu: %llx\n",
+                    req->io_count);
     }
 }
 
 //get the ioreq packets from share mem
-ioreq_t* __cpu_get_ioreq(void)
+static ioreq_t* __cpu_get_ioreq(int vcpu)
 {
     ioreq_t *req;
 
-    req = &(shared_page->vcpu_iodata[0].vp_ioreq);
-    if (req->state == STATE_IOREQ_READY) {
-        req->state = STATE_IOREQ_INPROCESS;
-    } else {
-        fprintf(logfile, "False I/O request ... in-service already: "
-                         "%x, pvalid: %x, port: %llx, "
-                         "data: %llx, count: %llx, size: %llx\n",
-                         req->state, req->pdata_valid, req->addr,
-                         req->u.data, req->count, req->size);
-        req = NULL;
-    }
+    req = &(shared_page->vcpu_iodata[vcpu].vp_ioreq);
+
+    if ( req->state == STATE_IOREQ_READY )
+        return req;
 
-    return req;
+    fprintf(logfile, "False I/O request ... in-service already: "
+                     "%x, pvalid: %x, port: %llx, "
+                     "data: %llx, count: %llx, size: %llx\n",
+                     req->state, req->pdata_valid, req->addr,
+                     req->u.data, req->count, req->size);
+    return NULL;
 }
 
 //use poll to get the port notification
 //ioreq_vec--out,the
 //retval--the number of ioreq packet
-ioreq_t* cpu_get_ioreq(void)
+static ioreq_t* cpu_get_ioreq(void)
 {
-    int rc;
+    int i, rc;
     evtchn_port_t port;
 
     rc = read(evtchn_fd, &port, sizeof(port));
-    if ((rc == sizeof(port)) && (port == ioreq_local_port)) {
+    if ( rc == sizeof(port) ) {
+        for ( i = 0; i < vcpus; i++ )
+            if ( shared_page->vcpu_iodata[i].dm_eport == port )
+                break;
+
+        if ( i == vcpus ) {
+            fprintf(logfile, "Fatal error while trying to get io event!\n");
+            exit(1);
+        }
+
         // unmask the wanted port again
-        write(evtchn_fd, &ioreq_local_port, sizeof(port));
+        write(evtchn_fd, &port, sizeof(port));
 
         //get the io packet from shared memory
-        return __cpu_get_ioreq();
+        send_vcpu = i;
+        return __cpu_get_ioreq(i);
     }
 
     //read error or read nothing
@@ -361,6 +370,8 @@ void cpu_handle_ioreq(CPUState *env)
     ioreq_t *req = cpu_get_ioreq();
 
     if (req) {
+        req->state = STATE_IOREQ_INPROCESS;
+
         if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
             if (req->size != 4)
                 req->u.data &= (1UL << (8 * req->size))-1;
@@ -465,7 +476,7 @@ int main_loop(void)
             struct ioctl_evtchn_notify notify;
 
             env->send_event = 0;
-            notify.port = ioreq_local_port;
+            notify.port = shared_page->vcpu_iodata[send_vcpu].dm_eport;
             (void)ioctl(evtchn_fd, IOCTL_EVTCHN_NOTIFY, &notify);
         }
     }
@@ -488,7 +499,7 @@ CPUState * cpu_init()
 {
     CPUX86State *env;
     struct ioctl_evtchn_bind_interdomain bind;
-    int rc;
+    int i, rc;
 
     cpu_exec_init();
     qemu_register_reset(qemu_hvm_reset, NULL);
@@ -509,14 +520,17 @@ CPUState * cpu_init()
         return NULL;
     }
 
+    /* FIXME: how about if we overflow the page here? */
     bind.remote_domain = domid;
-    bind.remote_port   = ioreq_remote_port;
-    rc = ioctl(evtchn_fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
-    if (rc == -1) {
-        fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
-        return NULL;
+    for ( i = 0; i < vcpus; i++ ) {
+        bind.remote_port = shared_page->vcpu_iodata[i].vp_eport;
+        rc = ioctl(evtchn_fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
+        if ( rc == -1 ) {
+            fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
+            return NULL;
+        }
+        shared_page->vcpu_iodata[i].dm_eport = rc;
     }
-    ioreq_local_port = rc;
 
     return env;
 }
index 6b32f286f0da4fe21a637fa62c257e373af9ca0c..c8794f132d969e98b7789ad066da34a3ed998614 100644 (file)
@@ -2337,7 +2337,6 @@ enum {
 
     QEMU_OPTION_S,
     QEMU_OPTION_s,
-    QEMU_OPTION_p,
     QEMU_OPTION_d,
     QEMU_OPTION_l,
     QEMU_OPTION_hdachs,
@@ -2414,7 +2413,6 @@ const QEMUOption qemu_options[] = {
 
     { "S", 0, QEMU_OPTION_S },
     { "s", 0, QEMU_OPTION_s },
-    { "p", HAS_ARG, QEMU_OPTION_p },
     { "d", HAS_ARG, QEMU_OPTION_d },
     { "l", HAS_ARG, QEMU_OPTION_l },
     { "hdachs", HAS_ARG, QEMU_OPTION_hdachs },
@@ -2938,13 +2936,6 @@ int main(int argc, char **argv)
                     fprintf(logfile, "domid: %d\n", domid);
                 }
                 break;
-            case QEMU_OPTION_p:
-                {
-                    extern evtchn_port_t ioreq_remote_port;
-                    ioreq_remote_port = atoi(optarg);
-                    fprintf(logfile, "eport: %d\n", ioreq_remote_port);
-                }
-                break;
             case QEMU_OPTION_l:
                 {
                     int mask;
index b993e51cfab0712412fcf3040eaedb4bf0958876..1af50c6e816896378f80233f89893c974eda17e9 100644 (file)
@@ -175,7 +175,6 @@ static int setup_guest(int xc_handle,
                        unsigned long nr_pages,
                        vcpu_guest_context_t *ctxt,
                        unsigned long shared_info_frame,
-                       unsigned int control_evtchn,
                        unsigned int vcpus,
                        unsigned int pae,
                        unsigned int acpi,
@@ -284,7 +283,19 @@ static int setup_guest(int xc_handle,
          shared_page_frame)) == 0 )
         goto error_out;
     memset(sp, 0, PAGE_SIZE);
-    sp->sp_global.eport = control_evtchn;
+
+    /* FIXME: how about if we overflow the page here? */
+    for ( i = 0; i < vcpus; i++ ) {
+        unsigned int vp_eport;
+
+        vp_eport = xc_evtchn_alloc_unbound(xc_handle, dom, 0);
+        if ( vp_eport < 0 ) {
+            fprintf(stderr, "Couldn't get unbound port from VMX guest.\n");
+            goto error_out;
+        }
+        sp->vcpu_iodata[i].vp_eport = vp_eport;
+    }
+
     munmap(sp, PAGE_SIZE);
 
     *store_mfn = page_array[(v_end >> PAGE_SHIFT) - 2];
@@ -331,7 +342,6 @@ int xc_hvm_build(int xc_handle,
                  uint32_t domid,
                  int memsize,
                  const char *image_name,
-                 unsigned int control_evtchn,
                  unsigned int vcpus,
                  unsigned int pae,
                  unsigned int acpi,
@@ -388,7 +398,7 @@ int xc_hvm_build(int xc_handle,
 
     ctxt->flags = VGCF_HVM_GUEST;
     if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
-                     ctxt, op.u.getdomaininfo.shared_info_frame, control_evtchn,
+                     ctxt, op.u.getdomaininfo.shared_info_frame,
                      vcpus, pae, acpi, apic, store_evtchn, store_mfn) < 0)
     {
         ERROR("Error constructing guest OS");
index c612eeb2e70ebaaa0451f09f90c06f9dcab443a4..f0e72d1e4c7b82f358b8788ad839d3273abf4e8e 100644 (file)
@@ -57,7 +57,6 @@ int xc_hvm_build(int xc_handle,
                  uint32_t domid,
                  int memsize,
                  const char *image_name,
-                 unsigned int control_evtchn,
                  unsigned int vcpus,
                  unsigned int pae,
                  unsigned int acpi,
index 5e6d98587b2a03fb249bf453283b8edf202b1ed7..cc56d8530e67a823f0c22aebacebf91325f9a185 100644 (file)
@@ -363,7 +363,7 @@ static PyObject *pyxc_hvm_build(XcObject *self,
 {
     uint32_t dom;
     char *image;
-    int control_evtchn, store_evtchn;
+    int store_evtchn;
     int memsize;
     int vcpus = 1;
     int pae  = 0;
@@ -371,15 +371,15 @@ static PyObject *pyxc_hvm_build(XcObject *self,
     int apic = 0;
     unsigned long store_mfn = 0;
 
-    static char *kwd_list[] = { "dom", "control_evtchn", "store_evtchn",
+    static char *kwd_list[] = { "dom", "store_evtchn",
                                "memsize", "image", "vcpus", "pae", "acpi", "apic",
                                NULL };
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiisiiii", kwd_list,
-                                      &dom, &control_evtchn, &store_evtchn,
-                                     &memsize, &image, &vcpus, &pae, &acpi, &apic) )
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiisiiii", kwd_list,
+                                      &dom, &store_evtchn, &memsize,
+                                      &image, &vcpus, &pae, &acpi, &apic) )
         return NULL;
 
-    if ( xc_hvm_build(self->xc_handle, dom, memsize, image, control_evtchn,
+    if ( xc_hvm_build(self->xc_handle, dom, memsize, image,
                      vcpus, pae, acpi, apic, store_evtchn, &store_mfn) != 0 )
         return PyErr_SetFromErrno(xc_error);
 
index fb5661718809905e13a26dbb500800569a21b43a..5e3f3eec8fa0946b7554315e4ba83d7e4f0e8cb2 100644 (file)
@@ -205,7 +205,6 @@ class HVMImageHandler(ImageHandler):
                         ("image/device-model", self.device_model),
                         ("image/display", self.display))
 
-        self.device_channel = None
         self.pid = 0
 
         self.dmargs += self.configVNC(imageConfig)
@@ -216,16 +215,10 @@ class HVMImageHandler(ImageHandler):
         self.apic = int(sxp.child_value(imageConfig, 'apic', 0))
 
     def buildDomain(self):
-        # Create an event channel
-        self.device_channel = xc.evtchn_alloc_unbound(dom=self.vm.getDomid(),
-                                                      remote_dom=0)
-        log.info("HVM device model port: %d", self.device_channel)
-
         store_evtchn = self.vm.getStorePort()
 
         log.debug("dom            = %d", self.vm.getDomid())
         log.debug("image          = %s", self.kernel)
-        log.debug("control_evtchn = %d", self.device_channel)
         log.debug("store_evtchn   = %d", store_evtchn)
         log.debug("memsize        = %d", self.vm.getMemoryTarget() / 1024)
         log.debug("vcpus          = %d", self.vm.getVCpuCount())
@@ -237,7 +230,6 @@ class HVMImageHandler(ImageHandler):
 
         return xc.hvm_build(dom            = self.vm.getDomid(),
                             image          = self.kernel,
-                            control_evtchn = self.device_channel,
                             store_evtchn   = store_evtchn,
                             memsize        = self.vm.getMemoryTarget() / 1024,
                             vcpus          = self.vm.getVCpuCount(),
@@ -345,7 +337,6 @@ class HVMImageHandler(ImageHandler):
         if len(vnc):
             args = args + vnc
         args = args + ([ "-d",  "%d" % self.vm.getDomid(),
-                  "-p", "%d" % self.device_channel,
                   "-m", "%s" % (self.vm.getMemoryTarget() / 1024)])
         args = args + self.dmargs
         env = dict(os.environ)
index b76e5328cc2f2f9fc1dd229823511f637786e034..cff7af1382a81282ef06aa3ef7ba0266970c927d 100644 (file)
@@ -124,11 +124,6 @@ static void hvm_map_io_shared_page(struct domain *d)
         domain_crash_synchronous();
     }
     d->arch.hvm_domain.shared_page_va = (unsigned long)p;
-
-    HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d));
-
-    clear_bit(iopacket_port(d),
-              &d->shared_info->evtchn_mask[0]);
 }
 
 static int validate_hvm_info(struct hvm_info_table *t)
index bc3d91f0935e5b5750ab54b6b757d13c1d53dceb..529e1c45d7c94d64b346b0d310361ec6a31de218 100644 (file)
@@ -332,8 +332,8 @@ int intercept_pit_io(ioreq_t *p)
 void hlt_timer_fn(void *data)
 {
     struct vcpu *v = data;
-    
-    evtchn_set_pending(v, iopacket_port(v->domain));
+
+    evtchn_set_pending(v, iopacket_port(v));
 }
 
 static __inline__ void missed_ticks(struct hvm_virpit*vpit)
index 39d53cccd5a51ffb758026d5e59fbf0a90ce4aac..26339a0a7493a1e8e69aaebb1792e32137518f59 100644 (file)
@@ -697,8 +697,8 @@ void hvm_io_assist(struct vcpu *v)
 void hvm_wait_io(void)
 {
     struct vcpu *v = current;
-    struct domain *d = v->domain;    
-    int port = iopacket_port(d);
+    struct domain *d = v->domain;
+    int port = iopacket_port(v);
 
     for ( ; ; )
     {
@@ -729,8 +729,8 @@ void hvm_wait_io(void)
 void hvm_safe_block(void)
 {
     struct vcpu *v = current;
-    struct domain *d = v->domain;    
-    int port = iopacket_port(d);
+    struct domain *d = v->domain;
+    int port = iopacket_port(v);
 
     for ( ; ; )
     {
index ce944ea47ba4bb0548f38559962f546d0aca90fe..4ae448ba398dac2c9d8ec7b235d811c7a33c2372 100644 (file)
@@ -42,8 +42,6 @@
 #define DECODE_success  1
 #define DECODE_failure  0
 
-extern long evtchn_send(int lport);
-
 #if defined (__x86_64__)
 static inline long __get_reg_value(unsigned long reg, int size)
 {
@@ -648,6 +646,8 @@ void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
     p->count = count;
     p->df = regs->eflags & EF_DF ? 1 : 0;
 
+    p->io_count++;
+
     if (pvalid) {
         if (hvm_paging_enabled(current))
             p->u.pdata = (void *) gva_to_gpa(value);
@@ -664,18 +664,18 @@ void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
 
     p->state = STATE_IOREQ_READY;
 
-    evtchn_send(iopacket_port(v->domain));
+    evtchn_send(iopacket_port(v));
     hvm_wait_io();
 }
 
-void send_mmio_req(unsigned char type, unsigned long gpa,
-                   unsigned long count, int size, long value, int dir, int pvalid)
+void send_mmio_req(
+    unsigned char type, unsigned long gpa,
+    unsigned long count, int size, long value, int dir, int pvalid)
 {
     struct vcpu *v = current;
     vcpu_iodata_t *vio;
     ioreq_t *p;
     struct cpu_user_regs *regs;
-    extern long evtchn_send(int lport);
 
     regs = current->arch.hvm_vcpu.mmio_op.inst_decoder_regs;
 
@@ -702,6 +702,8 @@ void send_mmio_req(unsigned char type, unsigned long gpa,
     p->count = count;
     p->df = regs->eflags & EF_DF ? 1 : 0;
 
+    p->io_count++;
+
     if (pvalid) {
         if (hvm_paging_enabled(v))
             p->u.pdata = (void *) gva_to_gpa(value);
@@ -718,7 +720,7 @@ void send_mmio_req(unsigned char type, unsigned long gpa,
 
     p->state = STATE_IOREQ_READY;
 
-    evtchn_send(iopacket_port(v->domain));
+    evtchn_send(iopacket_port(v));
     hvm_wait_io();
 }
 
index 1a085e7989a1a497b8009c77e199ee8a96af06c8..ec949aecc63b2074ace4501c2ba32aaad443683a 100644 (file)
@@ -64,7 +64,6 @@ static unsigned long trace_values[NR_CPUS][4];
 /* 
  * External functions, etc. We should move these to some suitable header file(s) */
 
-extern long evtchn_send(int lport);
 extern void do_nmi(struct cpu_user_regs *, unsigned long);
 extern int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
                                 int inst_len);
index 4796adb0268847831f870a09b4cb18f542f2824e..0fa4040e90f30d9808cca9a9be84c3722c5457dc 100644 (file)
@@ -421,6 +421,18 @@ void svm_do_launch(struct vcpu *v)
     if (v->vcpu_id == 0)
         hvm_setup_platform(v->domain);
 
+    if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
+    {
+        printk("HVM domain bind port %d to vcpu %d failed!\n",
+               iopacket_port(v), v->vcpu_id);
+        domain_crash_synchronous();
+    }
+
+    HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
+
+    clear_bit(iopacket_port(v),
+              &v->domain->shared_info->evtchn_mask[0]);
+
     if (hvm_apic_support(v->domain))
         vlapic_init(v);
     init_timer(&v->arch.hvm_svm.hlt_timer,
@@ -490,7 +502,7 @@ void svm_do_resume(struct vcpu *v)
 
     svm_stts(v);
 
-    if ( test_bit(iopacket_port(d), &d->shared_info->evtchn_pending[0]) ||
+    if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
          test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
         hvm_wait_io();
 
index 0c4e2e35af9440b63a3a645e9077b46f2aae2186..156e7fa68c6c1a05debf79604da0eba4b62dc4e2 100644 (file)
@@ -210,7 +210,7 @@ static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
                 set_bit(vector, &vlapic->tmr[0]);
             }
         }
-        evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->domain));
+        evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->vcpu));
         result = 1;
         break;
 
@@ -834,7 +834,7 @@ void vlapic_timer_fn(void *data)
         }
         else
             vlapic->intr_pending_count[vlapic_lvt_vector(vlapic, VLAPIC_LVT_TIMER)]++;
-        evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->domain));
+        evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->vcpu));
     }
 
     vlapic->timer_current_update = NOW();
index b7a27f904a88e586271f5e9f00981334953d4ad2..f836994d2efd29015a05f8745d4932631a74984f 100644 (file)
@@ -178,7 +178,7 @@ void vmx_do_resume(struct vcpu *v)
 
     vmx_stts();
 
-    if ( test_bit(iopacket_port(d), &d->shared_info->evtchn_pending[0]) ||
+    if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
          test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
         hvm_wait_io();
 
index 5fee564d97659d5778d2c5af7a78660237955bd0..9c904e35ba7eb7729fdb251dde89b853d6f04ba7 100644 (file)
@@ -200,6 +200,18 @@ static void vmx_do_launch(struct vcpu *v)
     if (v->vcpu_id == 0)
         hvm_setup_platform(v->domain);
 
+    if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
+    {
+        printk("VMX domain bind port %d to vcpu %d failed!\n",
+               iopacket_port(v), v->vcpu_id);
+        domain_crash_synchronous();
+    }
+
+    HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
+
+    clear_bit(iopacket_port(v),
+              &v->domain->shared_info->evtchn_mask[0]);
+
     __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
 
     error |= __vmwrite(GUEST_CR0, cr0);
index d0403161ef7b117c36c8d9b4e8493d389bd38b69..699563a0f2466b9536b5731dd87ad0928b9c3452 100644 (file)
@@ -448,7 +448,6 @@ unsigned long vmx_get_ctrl_reg(struct vcpu *v, unsigned int num)
     return 0;                   /* dummy */
 }
 
-extern long evtchn_send(int lport);
 void do_nmi(struct cpu_user_regs *);
 
 static int check_vmx_controls(ctrls, msr)
index 0f1cc274ded8963a6f5d168047eb58266ed4e082..ea250b426d658ee930e714a58cab7f8b2eda3c23 100644 (file)
@@ -399,7 +399,7 @@ static long evtchn_close(evtchn_close_t *close)
 }
 
 
-long evtchn_send(int lport)
+long evtchn_send(unsigned int lport)
 {
     struct evtchn *lchn, *rchn;
     struct domain *ld = current->domain, *rd;
@@ -508,15 +508,13 @@ static long evtchn_status(evtchn_status_t *status)
     return rc;
 }
 
-static long evtchn_bind_vcpu(evtchn_bind_vcpu_t *bind) 
+long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
 {
-    struct domain *d    = current->domain;
-    int            port = bind->port;
-    int            vcpu = bind->vcpu;
+    struct domain *d = current->domain;
     struct evtchn *chn;
     long           rc = 0;
 
-    if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu] == NULL) )
+    if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
         return -ENOENT;
 
     spin_lock(&d->evtchn_lock);
@@ -533,7 +531,7 @@ static long evtchn_bind_vcpu(evtchn_bind_vcpu_t *bind)
     case ECS_UNBOUND:
     case ECS_INTERDOMAIN:
     case ECS_PIRQ:
-        chn->notify_vcpu_id = vcpu;
+        chn->notify_vcpu_id = vcpu_id;
         break;
     default:
         rc = -EINVAL;
@@ -638,7 +636,7 @@ long do_event_channel_op(struct evtchn_op *uop)
         break;
 
     case EVTCHNOP_bind_vcpu:
-        rc = evtchn_bind_vcpu(&op.u.bind_vcpu);
+        rc = evtchn_bind_vcpu(op.u.bind_vcpu.port, op.u.bind_vcpu.vcpu);
         break;
 
     case EVTCHNOP_unmask:
index 577e9660775c3b1b04cc1e008e134e5d7560bff3..46698d055cec1d55c3fedf737cfc3ba4ea2e4a66 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/hvm/vpic.h>
 #include <asm/hvm/vioapic.h>
 #include <public/hvm/ioreq.h>
+#include <public/event_channel.h>
 
 #define MAX_OPERAND_NUM 2
 
index 3507e75024c50783f2f201df6d2d529ac6c7567f..8c4d6df69a553f8c9a0e82a0e72fb3fd8d710811 100644 (file)
@@ -40,9 +40,9 @@ static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
     return &get_sp(d)->vcpu_iodata[cpu];
 }
 
-static inline int iopacket_port(struct domain *d)
+static inline int iopacket_port(struct vcpu *v)
 {
-    return get_sp(d)->sp_global.eport;
+    return get_vio(v->domain, v->vcpu_id)->vp_eport;
 }
 
 /* XXX these are really VMX specific */
index 308ac39c9a6fc6c2408b0b0fa3046432ea58e29a..c93f908dd68b3a71fca91475424306d87776552f 100644 (file)
@@ -53,6 +53,7 @@ typedef struct {
     uint8_t dir:1;          /*  1=read, 0=write             */
     uint8_t df:1;
     uint8_t type;           /* I/O type                     */
+    uint64_t io_count;      /* How many IO done on a vcpu   */
 } ioreq_t;
 
 #define MAX_VECTOR      256
@@ -65,11 +66,13 @@ typedef struct {
     uint16_t    pic_irr;
     uint16_t    pic_last_irr;
     uint16_t    pic_clear_irr;
-    int         eport; /* Event channel port */
 } global_iodata_t;
 
 typedef struct {
-    ioreq_t     vp_ioreq;
+    ioreq_t         vp_ioreq;
+    /* Event channel port */
+    unsigned long   vp_eport;   /* VMX vcpu uses this to notify DM */
+    unsigned long   dm_eport;   /* DM uses this to notify VMX vcpu */
 } vcpu_iodata_t;
 
 typedef struct {
index b3c7713679cb0a11ef041b1f339cd935ad81cace..2e0c2712cc52d17438b2c685fedea46a9b4515d0 100644 (file)
@@ -63,4 +63,10 @@ extern void send_guest_pirq(struct domain *d, int pirq);
     (!!(v)->vcpu_info->evtchn_upcall_pending &  \
       !(v)->vcpu_info->evtchn_upcall_mask)
 
+/* Send a notification from a local event-channel port. */
+extern long evtchn_send(unsigned int lport);
+
+/* Bind a local event-channel port to the specified VCPU. */
+extern long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
+
 #endif /* __XEN_EVENT_H__ */